kaf24@camelot.eng.3leafnetworks.com
kaf24@freefall.cl.cam.ac.uk
kaf24@labyrinth.cl.cam.ac.uk
+kaf24@pb001.cl.cam.ac.uk
kaf24@penguin.local
kaf24@plym.cl.cam.ac.uk
kaf24@scramble.cl.cam.ac.uk
OBJS := $(subst $(TARGET_SUBARCH)/asm-offsets.o,,$(OBJS))
+ifneq ($(TARGET_SUBARCH),i386)
+OBJS := $(subst vmx.o,,$(OBJS))
+OBJS := $(subst vmx_io.o,,$(OBJS))
+OBJS := $(subst vmx_vmcs.o,,$(OBJS))
+endif ($(TARGET_SUBARCH),i386)
+
default: boot/$(TARGET_SUBARCH).o $(OBJS) boot/mkelf32
$(LD) $(LDFLAGS) -r -o arch.o $(OBJS)
$(LD) $(LDFLAGS) -T $(TARGET_SUBARCH)/xen.lds -N \
unmap_domain_mem:
ret_from_intr:
#undef machine_to_phys_mapping
+#undef phys_to_machine_mapping
.globl copy_to_user, set_intr_gate, die, machine_to_phys_mapping
+.globl phys_to_machine_mapping
copy_to_user:
set_intr_gate:
die:
machine_to_phys_mapping:
+phys_to_machine_mapping:
.globl copy_from_user, show_registers, do_iopl
copy_from_user:
show_registers:
__sti();
}
-static void idle_loop(void)
+void idle_loop(void)
{
int cpu = smp_processor_id();
for ( ; ; )
{
struct pfn_info *spfn_info;
unsigned long spfn;
- l2_pgentry_t *spl2e = 0, *gpl2e;
+ l2_pgentry_t *spl2e = 0;
unsigned long guest_gpfn;
__get_machine_to_phys(m, guest_gpfn, gpfn);
#ifdef __i386__
/* Install hypervisor and 2x linear p.t. mapings. */
- if (m->shadow_mode == SHM_full_32)
+ if ( m->shadow_mode == SHM_full_32 )
+ {
vmx_update_shadow_state(m, gpfn, spfn);
- else {
+ }
+ else
+ {
spl2e = (l2_pgentry_t *)map_domain_mem(spfn << PAGE_SHIFT);
- // can't use the linear map as we may not be in the right PT
- gpl2e = (l2_pgentry_t *) map_domain_mem(gpfn << PAGE_SHIFT);
/*
- * We could proactively fill in PDEs for pages that are already shadowed.
- * However, we tried it and it didn't help performance. This is simpler.
+ * We could proactively fill in PDEs for pages that are already
+ * shadowed. However, we tried it and it didn't help performance.
+ * This is simpler.
*/
- memset(spl2e, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
+ memset(spl2e, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE*sizeof(l2_pgentry_t));
/* Install hypervisor and 2x linear p.t. mapings. */
memcpy(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
}
#endif
- if (m->shadow_mode != SHM_full_32)
- {
+ if ( m->shadow_mode != SHM_full_32 )
unmap_domain_mem(spl2e);
- }
SH_VLOG("shadow_l2_table( %08lx -> %08lx)", gpfn, spfn);
return spfn;
OFFSET(XREGS_ss, struct xen_regs, ss);
BLANK();
- OFFSET(DOMAIN_processor, struct domain, processor);
- OFFSET(DOMAIN_shared_info, struct domain, shared_info);
- OFFSET(DOMAIN_event_sel, struct domain, thread.event_selector);
- OFFSET(DOMAIN_event_addr, struct domain, thread.event_address);
- OFFSET(DOMAIN_failsafe_sel, struct domain, thread.failsafe_selector);
- OFFSET(DOMAIN_failsafe_addr, struct domain, thread.failsafe_address);
- OFFSET(DOMAIN_trap_bounce, struct domain, thread.trap_bounce);
- OFFSET(DOMAIN_thread_flags, struct domain, thread.flags);
+ OFFSET(EDOMAIN_processor, struct exec_domain, processor);
+ OFFSET(EDOMAIN_vcpu_info, struct exec_domain, vcpu_info);
+ OFFSET(EDOMAIN_event_sel, struct exec_domain, thread.event_selector);
+ OFFSET(EDOMAIN_event_addr, struct exec_domain, thread.event_address);
+ OFFSET(EDOMAIN_failsafe_sel, struct exec_domain, thread.failsafe_selector);
+ OFFSET(EDOMAIN_failsafe_addr, struct exec_domain, thread.failsafe_address);
+ OFFSET(EDOMAIN_trap_bounce, struct exec_domain, thread.trap_bounce);
+ OFFSET(EDOMAIN_thread_flags, struct exec_domain, thread.flags);
BLANK();
OFFSET(SHINFO_upcall_pending, shared_info_t,
" .quad 1b,2b\n"
".previous"
: [size8] "=c"(size), [dst] "=&D" (__d0)
- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst] "(addr),
+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
[zero] "r" (0UL), [eight] "r" (8UL));
return size;
}
* A Linux-style configuration list.
*/
-#ifndef __XEN_I386_CONFIG_H__
-#define __XEN_I386_CONFIG_H__
+#ifndef __X86_CONFIG_H__
+#define __X86_CONFIG_H__
+#ifdef __i386__
#define CONFIG_VMX 1
+#endif
+
#define CONFIG_X86 1
#define CONFIG_SMP 1
#define ELFSIZE 32
#endif
-#endif /* __XEN_I386_CONFIG_H__ */
+#endif /* __X86_CONFIG_H__ */
* contiguous (or near contiguous) physical memory.
*/
#undef machine_to_phys_mapping
+
/*
* The phys_to_machine_mapping is the reversed mapping of MPT for full
* virtualization.
#ifdef __x86_64__
extern unsigned long *machine_to_phys_mapping;
+extern unsigned long *phys_to_machine_mapping;
#else
#define machine_to_phys_mapping ((unsigned long *)RDWR_MPT_VIRT_START)
-#ifdef CONFIG_VMX
#define phys_to_machine_mapping ((unsigned long *)PERDOMAIN_VIRT_START)
#endif
-#endif
#define DEFAULT_GDT_ENTRIES (LAST_RESERVED_GDT_ENTRY+1)
#define DEFAULT_GDT_ADDRESS ((unsigned long)gdt_table)
unsigned long kernelstack; /* TOS for current process */
unsigned long oldrsp; /* user rsp for system call */
unsigned long irqrsp; /* Old rsp for interrupts. */
- struct domain *pcurrent; /* Current process */
+ struct exec_domain *pcurrent; /* Current process */
int irqcount; /* Irq nesting counter. Starts with -1 */
int cpunumber; /* Logical CPU number */
char *irqstackptr; /* top of irqstack */
l1_pgentry_t *perdomain_ptes;
pagetable_t pagetable;
-#ifdef CONFIG_VMX
-
-#define SHM_full_32 (8) /* full virtualization for 32-bit */
-
- pagetable_t monitor_table;
- l2_pgentry_t *vpagetable; /* virtual address of pagetable */
- l2_pgentry_t *shadow_vtable; /* virtual address of shadow_table */
- l2_pgentry_t *guest_pl2e_cache; /* guest page directory cache */
- unsigned long min_pfn; /* min host physical */
- unsigned long max_pfn; /* max host physical */
-#endif
+ pagetable_t monitor_table;
+ l2_pgentry_t *vpagetable; /* virtual address of pagetable */
+ l2_pgentry_t *shadow_vtable; /* virtual address of shadow_table */
+ l2_pgentry_t *guest_pl2e_cache; /* guest page directory cache */
+ unsigned long min_pfn; /* min host physical */
+ unsigned long max_pfn; /* max host physical */
/* shadow mode status and controls */
unsigned int shadow_mode; /* flags to control shadow table operation */
#define STACK_RESERVED \
(sizeof(execution_context_t) + sizeof(struct domain *))
-static inline struct exec_domain * get_current(void)
+static inline struct exec_domain *get_current(void)
{
- struct exec_domain *current;
+ struct exec_domain *ed;
__asm__ ( "orl %%esp,%0; andl $~3,%0; movl (%0),%0"
- : "=r" (current) : "0" (STACK_SIZE-4) );
- return current;
+ : "=r" (ed) : "0" (STACK_SIZE-4) );
+ return ed;
}
#define current get_current()
-static inline void set_current(struct exec_domain *p)
+static inline void set_current(struct exec_domain *ed)
{
__asm__ ( "orl %%esp,%0; andl $~3,%0; movl %1,(%0)"
- : : "r" (STACK_SIZE-4), "r" (p) );
+ : : "r" (STACK_SIZE-4), "r" (ed) );
}
static inline execution_context_t *get_execution_context(void)
#define STACK_RESERVED \
(sizeof(execution_context_t))
-static inline struct domain * get_current(void)
+static inline struct exec_domain *get_current(void)
{
- struct domain *current;
- current = read_pda(pcurrent);
- return current;
+ struct exec_domain *ed;
+ ed = read_pda(pcurrent);
+ return ed;
}
#define current get_current()
-static inline void set_current(struct domain *p)
+static inline void set_current(struct exec_domain *ed)
{
- write_pda(pcurrent,p);
+ write_pda(pcurrent, ed);
}
static inline execution_context_t *get_execution_context(void)